Compare commits
75 Commits
master
...
heading-co
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6e50d9f013 | ||
|
|
065b298d1f | ||
|
|
fe2b7d065d | ||
|
|
2727cf1821 | ||
|
|
6c77594daf | ||
|
|
8d35a273fd | ||
|
|
cc1064a40a | ||
|
|
17c8927deb | ||
|
|
0ba76a1203 | ||
|
|
bc7525e353 | ||
|
|
0cf8f6791d | ||
|
|
62d8f378d5 | ||
|
|
b02cbafcc0 | ||
|
|
df2b2eb9f7 | ||
|
|
6058dc62b4 | ||
|
|
4991c72cec | ||
|
|
e876a487f5 | ||
|
|
f6d289b223 | ||
|
|
a2cef24f3b | ||
|
|
19bb7ccd7b | ||
|
|
c273945d4d | ||
|
|
084594a7d3 | ||
|
|
51b34e6a27 | ||
|
|
19d2cae392 | ||
|
|
67e5a44d49 | ||
|
|
c5eaf29b09 | ||
|
|
197862569e | ||
|
|
43ece69d2e | ||
|
|
2721a242ce | ||
|
|
69b2ce8f0a | ||
|
|
4679cda441 | ||
|
|
a64d9d7d9a | ||
|
|
4e7c855995 | ||
|
|
eb980259cb | ||
|
|
ddac47dbaa | ||
|
|
7d2c45786f | ||
|
|
ef2fbb9a16 | ||
|
|
ffb2a08055 | ||
|
|
23b05f2366 | ||
|
|
0a3812c83d | ||
|
|
b70c2f3519 | ||
|
|
178aeec565 | ||
|
|
570ffeb100 | ||
|
|
214288c1c3 | ||
|
|
c3389abb59 | ||
|
|
93aba40e00 | ||
|
|
447a3dcefc | ||
|
|
a9ce7f2963 | ||
|
|
2ee602c0b3 | ||
|
|
507c05592e | ||
|
|
b11bbc5058 | ||
|
|
fd0382f3a0 | ||
|
|
9cdcef6d71 | ||
|
|
1a4f665639 | ||
|
|
e6fff0165e | ||
|
|
d873288680 | ||
|
|
26f8baacfb | ||
|
|
f60ddaf19d | ||
|
|
3bbf6834c3 | ||
|
|
80c911e41f | ||
|
|
018e28bc84 | ||
|
|
405bc8ecc3 | ||
|
|
1cbb52f4c4 | ||
|
|
5f077eb1e0 | ||
|
|
48802f0c8d | ||
|
|
ccd1e612ae | ||
|
|
ceddd4abb9 | ||
|
|
1f4bd5d60e | ||
|
|
e5bf85a551 | ||
|
|
b8ab82f95c | ||
|
|
f154a6c9ec | ||
|
|
56d1b625ee | ||
|
|
892eec10d5 | ||
|
|
9b0ad40f55 | ||
|
|
e0722edb57 |
16
TODO.org
16
TODO.org
@@ -14,9 +14,6 @@
|
||||
:PROPERTIES:
|
||||
:CREATED: [2022-05-16 Mon 22:12]
|
||||
:END:
|
||||
:LOGBOOK:
|
||||
CLOCK: [2022-06-01 Wed 16:47]
|
||||
:END:
|
||||
|
||||
#+begin_src reason
|
||||
[@bs.module "react"]
|
||||
@@ -30,6 +27,19 @@ CLOCK: [2022-06-01 Wed 16:47]
|
||||
- [[https://blog.bitsrc.io/build-our-own-react-redux-using-usereducer-and-usecontext-hooks-a5574b526475][Build Your Own React-Redux Using useReducer and useContext Hooks | by Chidume Nnamdi | Bits and Pieces]]
|
||||
|
||||
** TODO Headline content parser
|
||||
:LOGBOOK:
|
||||
CLOCK: [2022-08-09 Tue 17:51]--[2022-08-09 Tue 19:51] => 2:00
|
||||
:END:
|
||||
|
||||
So I've had some heavy misconceptions while working on the content,
|
||||
I would preparse all content tokens, just to work over them, but that doesn't make sense.
|
||||
Now w
|
||||
|
||||
Currently I have a big problem that newline is not pure and interferes with the other function builders
|
||||
|
||||
[[file:src/org/org_block_heading.nim::let contentEndParser = choice(@\[][This]] doesnt work because newline is "impure"
|
||||
|
||||
Gonna convert everything to funcs.....
|
||||
** TODO Store the line numbers :PARSER:
|
||||
** TODO Tag parser :PARSER:
|
||||
** TODO Creating hot reload :DEV_ENVIRONMENT:
|
||||
|
||||
@@ -1,5 +1,14 @@
|
||||
#+TITLE: Spec
|
||||
|
||||
- dsfsd
|
||||
1. 23231o
|
||||
|
||||
#+begin_src
|
||||
|
||||
#+end_src
|
||||
|
||||
- sdfsdfsdf
|
||||
- sdfsdfsdf
|
||||
|
||||
* Specs
|
||||
|
||||
|
||||
@@ -33,11 +33,14 @@ let parseTodoKeyword = todoKeywords.createTodoKeywordParser()
|
||||
let parseDoneKeyword = doneKeywords.createTodoKeywordParser()
|
||||
|
||||
let parseContentText = @[
|
||||
anyUntil(choice(@[endOfStream, str("\n*")])),
|
||||
anyUntil(choice(@[endOfStream, str("\n")])),
|
||||
]
|
||||
|
||||
let parseHeadlineNewline = @[
|
||||
ignore(newline)
|
||||
choice(@[
|
||||
ch(NewLines),
|
||||
endOfStream,
|
||||
])
|
||||
]
|
||||
|
||||
let parseHeadingText = @[
|
||||
@@ -91,9 +94,9 @@ proc tryBuildHeading(builder: OrgBuilderResult): OrgBuilderResult =
|
||||
|
||||
(parseHeadingText, buildHeadlineContent, false),
|
||||
|
||||
(parseContentText, buildHeadlineChildren, true),
|
||||
# (parseContentText, buildHeadlineChildren, true),
|
||||
|
||||
(parseHeadlineNewline, buildHeadlineNewline, true),
|
||||
(parseHeadlineNewline, buildHeadlineNewline, false),
|
||||
]
|
||||
)
|
||||
|
||||
@@ -102,7 +105,8 @@ let headingParser = choice(@[
|
||||
endOfStream,
|
||||
])
|
||||
|
||||
let newlineParser: seq[Parser -> ParserResult] = @[newline + newline]
|
||||
|
||||
let contentEndParser = newline
|
||||
|
||||
let buildNewline = func(tokens: seq[ParserToken], org: OrgBlock): OrgBlock {.closure.} =
|
||||
OrgBlock(
|
||||
@@ -111,44 +115,63 @@ let buildNewline = func(tokens: seq[ParserToken], org: OrgBlock): OrgBlock {.clo
|
||||
|
||||
proc concatOrgBlock(xs: seq[OrgBlock], ys: seq[OrgBlock]): seq[OrgBlock] = xs & ys
|
||||
|
||||
let parseBlockText = @[
|
||||
anyUntil(choice(@[endOfStream, str("\n")])),
|
||||
]
|
||||
|
||||
let contentParsersSeq: seq[tuple[
|
||||
parsers: seq[Parser -> ParserResult],
|
||||
tokenFoldFn: (seq[ParserToken], OrgBlock) -> OrgBlock,
|
||||
ignoreEmpty: bool,
|
||||
# concatFn: (seq[OrgBlock], seq[OrgBlock]) -> seq[OrgBlock],
|
||||
]] = @[
|
||||
(newlineParser, buildNewline, false),
|
||||
# (newlineParser, buildNewline, false),
|
||||
(parseBlockText, buildNewline, true),
|
||||
(contentEndParser, buildNewline, true),
|
||||
]
|
||||
|
||||
proc parseHeadlineChildren(builder: OrgBuilder): OrgBuilderResult =
|
||||
var headingBlock = builder.tree[0]
|
||||
|
||||
var builderAcc: OrgBuilderResult = OrgBuilderResult.ok(builder)
|
||||
# echo "Builder " & $builder
|
||||
|
||||
# var builderAcc: OrgBuilderResult = OrgBuilderResult.ok(builder)
|
||||
var builderAcc: OrgBuilderResult = OrgBuilderResult.ok(OrgBuilder(
|
||||
(
|
||||
parser: builder.parser,
|
||||
tree: @[],
|
||||
)
|
||||
))
|
||||
|
||||
echo "builderAcc before parse content ", builderAcc
|
||||
# echo builderAcc.tryParser(headingParser)
|
||||
while builderAcc.isOk() and builderAcc.tryParser(headingParser).isErr():
|
||||
builderAcc = builderAcc
|
||||
.applyParsersSeqToSeq(
|
||||
contentParsersSeq
|
||||
)
|
||||
echo "builderAcc after parse in while loop ", builderAcc
|
||||
# echo builderAcc
|
||||
|
||||
builderAcc
|
||||
|
||||
# let content = builderAcc.fold(
|
||||
# (_err) => "",
|
||||
# (builder: OrgBuilder) => builder.tree,
|
||||
# )
|
||||
let content = builderAcc.fold(
|
||||
(err) => 0, # newSeq[OrgBlock](),
|
||||
(builder: OrgBuilder) => len builder.tree,
|
||||
)
|
||||
|
||||
# echo "builderAcc" & $builderAcc
|
||||
echo "content" & $content
|
||||
|
||||
# headingBlock.content = content
|
||||
|
||||
# let res = builderAcc.fold(
|
||||
# (_err) => OrgBuilderResult.err((builder, "Could not parse content")),
|
||||
# (builder: OrgBuilder) => OrgBuilderResult.ok((
|
||||
# parser: builder.parser.emptyTokens(),
|
||||
# tree: @[headingBlock],
|
||||
# ))
|
||||
# )
|
||||
let res = builderAcc.fold(
|
||||
(err) => OrgBuilderResult.err((builder, "Could not parse content")),
|
||||
(builder: OrgBuilder) => OrgBuilderResult.ok((
|
||||
parser: builder.parser.emptyTokens(),
|
||||
tree: @[headingBlock],
|
||||
))
|
||||
)
|
||||
|
||||
# res
|
||||
res
|
||||
|
||||
proc makeOrg*(x: string): OrgBuilderResult =
|
||||
var acc = initOrgBuilder(x)
|
||||
@@ -181,14 +204,19 @@ proc foldOrg*(x: OrgBuilderResult): string =
|
||||
# echo acc.unsafeGet().tree[^1]
|
||||
|
||||
when isMainModule:
|
||||
let test1 = """* TODO Level 1
|
||||
|
||||
Some text inbetween
|
||||
let test1 = """* TODO Level 1"""
|
||||
|
||||
let test2 = """* TODO Level 1
|
||||
Foo
|
||||
** DONE Level 2
|
||||
|
||||
Some more content
|
||||
"""
|
||||
|
||||
let acc = makeOrg(test1).foldOrg()
|
||||
echo acc
|
||||
let test3 = """* TODO Level 1
|
||||
Foo
|
||||
"""
|
||||
|
||||
let acc = makeOrg(test3).foldOrg()
|
||||
|
||||
discard acc
|
||||
|
||||
@@ -7,7 +7,7 @@ import fp/[
|
||||
maybe,
|
||||
option,
|
||||
]
|
||||
import ./org_builder
|
||||
import ./org_builder_api
|
||||
import ./org_types
|
||||
import ../utils/fp
|
||||
import ../parser/parser_internals
|
||||
|
||||
@@ -330,10 +330,14 @@ proc foldBuilder*[T, T2](
|
||||
|
||||
# -- Stringifiers
|
||||
|
||||
proc `$`*(x: ParserToken): string =
|
||||
&"""ParserToken(
|
||||
value: {tokenStringValue(x)},
|
||||
)"""
|
||||
func pprint*(x: ParserToken): string =
|
||||
let str = tokenStringValue(x)
|
||||
let escapedChar = if str == "\n": "\\n"
|
||||
else: str
|
||||
&"""ParserToken("{escapedChar}")
|
||||
"""
|
||||
|
||||
func `$`*(x: ParserToken): string = pprint(x)
|
||||
|
||||
const LEFT_HIGHLIGHT_CHAR = ">>"
|
||||
const RIGHT_HIGHLIGHT_CHAR = "<<"
|
||||
|
||||
79
src_v2/org/org_builder_api.nim
Normal file
79
src_v2/org/org_builder_api.nim
Normal file
@@ -0,0 +1,79 @@
|
||||
import std/[
|
||||
collections/sequtils,
|
||||
strformat,
|
||||
sugar,
|
||||
]
|
||||
import fp/[
|
||||
resultM,
|
||||
]
|
||||
import ./org_types
|
||||
import ../parser/parser
|
||||
|
||||
# -- OrgInlineBlock.Builder.Type
|
||||
|
||||
type OrgInlineBuilderT* = OrgInlineBlock
|
||||
type OrgInlineBuilder* = Builder[OrgInlineBuilderT]
|
||||
type OrgInlineBuilderResult* = BuilderResult[OrgInlineBuilderT]
|
||||
|
||||
# -- OrgInlineBlock.Builder.Initializers
|
||||
|
||||
func initOrgInlineBuilder*(content: string): OrgInlineBuilderResult =
|
||||
return OrgInlineBuilderResult.ok(OrgInlineBuilder((
|
||||
parser: initParser(content),
|
||||
tree: newSeq[OrgInlineBuilderT](),
|
||||
)))
|
||||
|
||||
func tokenizeInlineTokens*(kind: orgInlineBlockKind): seq[ParserToken] -> seq[OrgInlineBuilderT] =
|
||||
return func(tokens: seq[ParserToken]): seq[OrgInlineBuilderT] =
|
||||
return @[
|
||||
OrgInlineBuilderT(
|
||||
kind: kind,
|
||||
content: tokens.tokensToString(),
|
||||
)
|
||||
]
|
||||
|
||||
func tokenizeRawText*(kind: orgInlineBlockKind): string -> OrgInlineBuilderT =
|
||||
return func(content: string): OrgInlineBuilderT =
|
||||
return OrgInlineBuilderT(
|
||||
kind: kind,
|
||||
content: content,
|
||||
)
|
||||
|
||||
# ## Blocks
|
||||
type OrgBuilderT* = OrgBlock
|
||||
type OrgBuilder* = Builder[OrgBuilderT]
|
||||
type OrgBuilderError* = BuilderError[OrgBuilderT]
|
||||
type OrgBuilderResult* = BuilderResult[OrgBuilderT]
|
||||
|
||||
func initOrgBuilder*(content: string): OrgBuilderResult =
|
||||
return OrgBuilderResult.ok(OrgBuilder((
|
||||
parser: initParser(content),
|
||||
tree: newSeq[OrgBuilderT](),
|
||||
)))
|
||||
|
||||
func pprint*(x: OrgBuilder): string =
|
||||
&"""OrgBuilder(
|
||||
parser: {x.parser}
|
||||
tree: {x.tree}
|
||||
)"""
|
||||
|
||||
func `$`*(x: OrgBuilder): string = pprint(x)
|
||||
|
||||
func pprint*(x: OrgBuilderError): string =
|
||||
&"""OrgBuilderError(
|
||||
parser: {x.parser}
|
||||
tree: {x.tree}
|
||||
)"""
|
||||
|
||||
func `$`*(x: OrgBuilderError): string = pprint(x)
|
||||
|
||||
# # proc orgBuilderConcat*(typeInfo: OrgBuilderT, concatFn: (ParserToken, OrgBuilderT) -> OrgBuilderT):
|
||||
# # (seq[ParserToken], OrgBuilderT) -> OrgBuilderT =
|
||||
# # return proc(xs: seq[ParserToken], builder: OrgBuilderT): OrgBuilderT =
|
||||
# # return xs.foldl(concatFn(b, a), typeInfo)
|
||||
|
||||
|
||||
# proc orgBuilderApply*(concatFn: (ParserToken, OrgBuilderT) -> OrgBuilderT):
|
||||
# (seq[ParserToken], OrgBuilderT) -> OrgBuilderT =
|
||||
# return proc(tokens: seq[ParserToken], builder: OrgBuilderT): OrgBuilderT =
|
||||
# tokens.foldl(concatFn(b, a), builder)
|
||||
218
src_v2/org/org_builder_heading.nim
Normal file
218
src_v2/org/org_builder_heading.nim
Normal file
@@ -0,0 +1,218 @@
|
||||
import std/[
|
||||
collections/sequtils,
|
||||
strformat,
|
||||
strutils,
|
||||
sugar,
|
||||
]
|
||||
import fp/[
|
||||
option,
|
||||
resultM,
|
||||
]
|
||||
import fusion/matching
|
||||
{.experimental: "caseStmtMacros".}
|
||||
import ./org_types
|
||||
import ./org_builder_api
|
||||
import ../parser/parser
|
||||
import ../utils/fp
|
||||
|
||||
# -- Parsers
|
||||
|
||||
let headingStarsParser* = following(@[
|
||||
ignore(optional(newline)),
|
||||
manyUntil(ch('*'), ch(' ')),
|
||||
ignore(space)
|
||||
])
|
||||
|
||||
proc createTodoKeywordParser(xs: seq[string]): parserFnT =
|
||||
choice(xs.map((x: string) => str(x) + ignore(space)))
|
||||
|
||||
let todoKeywords = @["TODO"]
|
||||
let doneKeywords = @["DONE"]
|
||||
|
||||
let parseTodoKeyword = todoKeywords.createTodoKeywordParser()
|
||||
let parseDoneKeyword = doneKeywords.createTodoKeywordParser()
|
||||
|
||||
# let parseContentText = @[
|
||||
# anyUntil(choice(@[endOfStream, str("\n")])),
|
||||
# ]
|
||||
|
||||
# let parseHeadlineNewline = @[
|
||||
# choice(@[
|
||||
# ch(NewLines),
|
||||
# endOfStream,
|
||||
# ])
|
||||
# ]
|
||||
|
||||
# let parseHeadingText = @[
|
||||
# anyUntil(newline),
|
||||
# ]
|
||||
|
||||
# -- Tokenizers
|
||||
|
||||
let buildStars = func(tokens: seq[ParserToken], org: OrgBlock): OrgBlock {.closure.}=
|
||||
org.level = tokens.len
|
||||
org
|
||||
|
||||
# let buildTodo = func(tokens: seq[ParserToken], org: OrgBlock): OrgBlock {.closure.}=
|
||||
# org.todo = tokens.tokensToString().some()
|
||||
# org
|
||||
|
||||
# let buildHeadlineContent = func(tokens: seq[ParserToken], org: OrgBlock): OrgBlock {.closure.}=
|
||||
# let headlineString = tokens.tokensToString()
|
||||
|
||||
# let tokens = tryBuildInline(headlineString)
|
||||
# .fold(
|
||||
# x => newSeq[OrgInlineBuilderT](),
|
||||
# (x: OrgInlineBuilder) => x.tree,
|
||||
# )
|
||||
|
||||
# org.headlineContent = tokens
|
||||
# org
|
||||
|
||||
# let buildHeadlineChildren = func(tokens: seq[ParserToken], org: OrgBlock): OrgBlock {.closure.}=
|
||||
# # let headlineString = tokens.tokensToString()
|
||||
|
||||
# org
|
||||
|
||||
# let buildHeadlineNewline = func(tokens: seq[ParserToken], org: OrgBlock): OrgBlock {.closure.}=
|
||||
# org
|
||||
|
||||
proc tryBuildHeading(builder: OrgBuilderResult): OrgBuilderResult =
|
||||
# echo builder
|
||||
builder
|
||||
.applyParsersSeqToSingle(
|
||||
OrgBlock(kind: orgHeading),
|
||||
@[
|
||||
(parseHeadingStars, buildStars, false),
|
||||
|
||||
(@[optional(parseTodoKeyword)], buildTodo, true),
|
||||
(@[optional(parseDoneKeyword)], buildTodo, true),
|
||||
|
||||
(parseHeadingText, buildHeadlineContent, false),
|
||||
|
||||
# (parseContentText, buildHeadlineChildren, true),
|
||||
|
||||
(parseHeadlineNewline, buildHeadlineNewline, false),
|
||||
]
|
||||
)
|
||||
|
||||
# let headingParser = choice(@[
|
||||
# newline + ch('*'),
|
||||
# endOfStream,
|
||||
# ])
|
||||
|
||||
|
||||
# let contentEndParser = newline
|
||||
|
||||
# let buildNewline = func(tokens: seq[ParserToken], org: OrgBlock): OrgBlock {.closure.} =
|
||||
# OrgBlock(
|
||||
# kind: orgNewline
|
||||
# )
|
||||
|
||||
# proc concatOrgBlock(xs: seq[OrgBlock], ys: seq[OrgBlock]): seq[OrgBlock] = xs & ys
|
||||
|
||||
# let parseBlockText = @[
|
||||
# anyUntil(choice(@[endOfStream, str("\n")])),
|
||||
# ]
|
||||
|
||||
# let contentParsersSeq: seq[tuple[
|
||||
# parsers: seq[Parser -> ParserResult],
|
||||
# tokenFoldFn: (seq[ParserToken], OrgBlock) -> OrgBlock,
|
||||
# ignoreEmpty: bool,
|
||||
# # concatFn: (seq[OrgBlock], seq[OrgBlock]) -> seq[OrgBlock],
|
||||
# ]] = @[
|
||||
# (parseBlockText, buildNewline, true),
|
||||
# (contentEndParser, buildNewline, true),
|
||||
# ]
|
||||
|
||||
# proc parseHeadlineChildren(builder: OrgBuilder): OrgBuilderResult =
|
||||
# var headingBlock = builder.tree[0]
|
||||
|
||||
# # echo "Builder " & $builder
|
||||
|
||||
# # var builderAcc: OrgBuilderResult = OrgBuilderResult.ok(builder)
|
||||
# var builderAcc: OrgBuilderResult = OrgBuilderResult.ok(OrgBuilder(
|
||||
# (
|
||||
# parser: builder.parser,
|
||||
# tree: @[],
|
||||
# )
|
||||
# ))
|
||||
|
||||
# echo "builderAcc before parse content ", builderAcc
|
||||
# # echo builderAcc.tryParser(headingParser)
|
||||
# while builderAcc.isOk() and builderAcc.tryParser(headingParser).isErr():
|
||||
# builderAcc = builderAcc
|
||||
# .applyParsersSeqToSeq(
|
||||
# contentParsersSeq
|
||||
# )
|
||||
# echo "builderAcc after parse in while loop ", builderAcc
|
||||
# # echo builderAcc
|
||||
|
||||
|
||||
# let content = builderAcc.fold(
|
||||
# (err) => 0, # newSeq[OrgBlock](),
|
||||
# (builder: OrgBuilder) => len builder.tree,
|
||||
# )
|
||||
|
||||
# # echo "builderAcc" & $builderAcc
|
||||
# echo "content" & $content
|
||||
|
||||
# # headingBlock.content = content
|
||||
|
||||
# let res = builderAcc.fold(
|
||||
# (err) => OrgBuilderResult.err((builder, "Could not parse content")),
|
||||
# (builder: OrgBuilder) => OrgBuilderResult.ok((
|
||||
# parser: builder.parser.emptyTokens(),
|
||||
# tree: @[headingBlock],
|
||||
# ))
|
||||
# )
|
||||
|
||||
# res
|
||||
|
||||
# proc makeOrg*(x: string): OrgBuilderResult =
|
||||
# var acc = initOrgBuilder(x)
|
||||
|
||||
# while acc.isOk() and acc.tryParser(endOfStream).isErr():
|
||||
# let unsafeAcc = acc.unsafeGet()
|
||||
|
||||
# let item = acc
|
||||
# .tryBuildHeading()
|
||||
# .flatMap(
|
||||
# (x: OrgBuilder) => parseHeadlineChildren(x)
|
||||
# )
|
||||
# .flatMap(
|
||||
# (x: OrgBuilder) => OrgBuilderResult.ok(OrgBuilder((
|
||||
# parser: x.parser,
|
||||
# tree: unsafeAcc.tree & x.tree,
|
||||
# )))
|
||||
# )
|
||||
|
||||
# acc = item
|
||||
|
||||
# acc
|
||||
|
||||
# proc foldOrg*(x: OrgBuilderResult): string =
|
||||
# x
|
||||
# .fold(
|
||||
# (err) => "Error" & $err,
|
||||
# (builder: OrgBuilder) => $builder.tree,
|
||||
# )
|
||||
# # echo acc.unsafeGet().tree[^1]
|
||||
|
||||
# when isMainModule:
|
||||
# let test1 = """* TODO Level 1"""
|
||||
|
||||
# let test2 = """* TODO Level 1
|
||||
# Foo
|
||||
# ** DONE Level 2
|
||||
|
||||
# Some more content
|
||||
# """
|
||||
|
||||
# let test3 = """* TODO Level 1
|
||||
# Foo
|
||||
# """
|
||||
|
||||
# let acc = makeOrg(test3).foldOrg()
|
||||
|
||||
# discard acc
|
||||
101
src_v2/org/org_builder_inline_text.nim
Normal file
101
src_v2/org/org_builder_inline_text.nim
Normal file
@@ -0,0 +1,101 @@
|
||||
import std/[
|
||||
collections/sequtils,
|
||||
sugar,
|
||||
]
|
||||
import fp/[
|
||||
option,
|
||||
resultM,
|
||||
]
|
||||
import ./org_types
|
||||
import ./org_builder_api
|
||||
import ./org_builder_link.nim
|
||||
import ../parser/parser
|
||||
|
||||
# -- Parsers
|
||||
|
||||
let boldParser* = anyBetweenPair(ch('*'))
|
||||
let italicParser* = anyBetweenPair(ch('/'))
|
||||
let underlineParser* = anyBetweenPair(ch('_'))
|
||||
let verbatimParser* = anyBetweenPair(ch('='))
|
||||
let codeParser* = anyBetweenPair(ch('~'))
|
||||
let strikeThroughParser* = anyBetweenPair(ch('+'))
|
||||
|
||||
# -- Tokenizers
|
||||
|
||||
let rawTextTokenizer* = tokenizeRawText(orgRawText)
|
||||
|
||||
let boldTokenizer* = tokenizeInlineTokens(orgBoldText)
|
||||
let italicTokenizer* = tokenizeInlineTokens(orgItalicText)
|
||||
let underlineTokenizer* = tokenizeInlineTokens(orgUnderlineText)
|
||||
let verbatimTokenizer* = tokenizeInlineTokens(orgVerbatimText)
|
||||
let codeTokenizer* = tokenizeInlineTokens(orgCodeText)
|
||||
let strikeThroughTokenizer* = tokenizeInlineTokens(orgStrikeThroughText)
|
||||
|
||||
let styledTextTokenizers* = @[
|
||||
(boldParser, boldTokenizer),
|
||||
(italicParser, italicTokenizer),
|
||||
(underlineParser, underlineTokenizer),
|
||||
(verbatimParser, verbatimTokenizer),
|
||||
(codeParser, codeTokenizer),
|
||||
(strikeThroughParser, strikeThroughTokenizer),
|
||||
(linkParser, linkTokenizer),
|
||||
]
|
||||
|
||||
proc tryTokenizeRawText*(tokens: seq[ParserToken]): seq[OrgInlineBuilderT] =
|
||||
## Merge all parser `tokens` into a string to tokenize for the builder.
|
||||
## Unless the string is empty, in this case return an empty list.
|
||||
let str = tokens.foldl(a & b.toString(), "")
|
||||
if str.len == 0: @[]
|
||||
else: @[rawTextTokenizer(str)]
|
||||
|
||||
proc tryTokenizeInline*(content: string): OrgInlineBuilderResult =
|
||||
initOrgInlineBuilder(content)
|
||||
.flatMap((builder: OrgInlineBuilder) => tryTokenize(
|
||||
builder = builder,
|
||||
builderFns = styledTextTokenizers,
|
||||
defaultTokenizerFn = tryTokenizeRawText,
|
||||
))
|
||||
|
||||
# -- Tests
|
||||
|
||||
when isMainModule:
|
||||
import fp/list
|
||||
|
||||
block testParsers:
|
||||
proc testParser(str: string, parser: parserFnT): string =
|
||||
initParserResult(str).flatMap(parser).tokensToString()
|
||||
|
||||
assert testParser("*bold*", boldParser) == "bold"
|
||||
assert testParser("/italic/", italicParser) == "italic"
|
||||
assert testParser("_underline_", underlineParser) == "underline"
|
||||
assert testParser("=verbatim=", verbatimParser) == "verbatim"
|
||||
assert testParser("~code~", codeParser) == "code"
|
||||
assert testParser("+strikeThrough+", strikeThroughParser) == "strikeThrough"
|
||||
|
||||
block testTokenizers:
|
||||
let testStr = "Regular *bold* [[placeholder.com]] /italic/ _underline_ =verbatim= ~code~ +strikethrough+ [[https://placeholder.com][title]]"
|
||||
let testBuilder = tryTokenizeInline(testStr)
|
||||
|
||||
assert testBuilder.isOk()
|
||||
|
||||
let testTokens = testBuilder.unsafeGet().tree
|
||||
let testTokensEpxected = @[
|
||||
OrgInlineBlock(kind: orgRawText, content: "Regular "),
|
||||
OrgInlineBlock(kind: orgBoldText, content: "bold"),
|
||||
OrgInlineBlock(kind: orgRawText, content: " "),
|
||||
OrgInlineBlock(kind: orgLink, linkUrl: "placeholder.com"),
|
||||
OrgInlineBlock(kind: orgRawText, content: " "),
|
||||
OrgInlineBlock(kind: orgItalicText, content: "italic"),
|
||||
OrgInlineBlock(kind: orgRawText, content: " "),
|
||||
OrgInlineBlock(kind: orgUnderlineText, content: "underline"),
|
||||
OrgInlineBlock(kind: orgRawText, content: " "),
|
||||
OrgInlineBlock(kind: orgVerbatimText, content: "verbatim"),
|
||||
OrgInlineBlock(kind: orgRawText, content: " "),
|
||||
OrgInlineBlock(kind: orgCodeText, content: "code"),
|
||||
OrgInlineBlock(kind: orgRawText, content: " "),
|
||||
OrgInlineBlock(kind: orgStrikeThroughText, content: "strikethrough"),
|
||||
OrgInlineBlock(kind: orgRawText, content: " "),
|
||||
OrgInlineBlock(kind: orgLink, linkDescription: "title".some(), linkUrl: "https://placeholder.com"),
|
||||
]
|
||||
|
||||
assert testTokens.asList() == testTokensEpxected.asList()
|
||||
89
src_v2/org/org_builder_link.nim
Normal file
89
src_v2/org/org_builder_link.nim
Normal file
@@ -0,0 +1,89 @@
|
||||
import std/[
|
||||
collections/sequtils,
|
||||
strformat,
|
||||
sugar,
|
||||
]
|
||||
import fp/[
|
||||
option,
|
||||
resultM,
|
||||
]
|
||||
import ./org_types
|
||||
import ./org_builder_api
|
||||
import ../parser/parser
|
||||
import ../utils/fp
|
||||
|
||||
import fusion/matching
|
||||
{.experimental: "caseStmtMacros".}
|
||||
|
||||
# -- Parsers
|
||||
|
||||
let linkStartParser* = following(@[
|
||||
ignore(str("[[")),
|
||||
anyUntil(choice(@[str("]["), str("]]")])),
|
||||
optional(ignore(str("][")))
|
||||
])
|
||||
let linkEndParser* = following(@[
|
||||
anyUntil(str("]]")),
|
||||
ignore(str("]]")),
|
||||
])
|
||||
|
||||
let linkParser* = proc(parser: Parser): ParserResult {.closure.} =
|
||||
# Parse an an org link in the `parser` state.
|
||||
# Return two tokens for a link with description: [[url][description]]
|
||||
# Return one token for a link without description : [[url]]
|
||||
let linkUrl = linkStartParser(parser)
|
||||
.map(flattenTokens)
|
||||
|
||||
let linkValue = linkUrl
|
||||
.map(emptyTokens)
|
||||
.flatMap(linkEndParser)
|
||||
.map(flattenTokens)
|
||||
|
||||
case (linkUrl, linkValue):
|
||||
of (Some(@key), Some(@value)):
|
||||
ParserResult.ok(
|
||||
Parser(
|
||||
state: value.state,
|
||||
tokens: @[
|
||||
key.tokens[0],
|
||||
value.tokens[0],
|
||||
],
|
||||
)
|
||||
)
|
||||
else:
|
||||
linkValue
|
||||
|
||||
# -- Tokenizers
|
||||
|
||||
func linkStringifier*(linkUrl: string, linkDescription: Option[string]): string =
|
||||
case (linkUrl, linkDescription):
|
||||
of (@linkUrl, Some(@linkDescription)):
|
||||
return &"[[{linkUrl}][{linkDescription}]]"
|
||||
of (@linkUrl, None()):
|
||||
return &"[[{linkUrl}]]"
|
||||
|
||||
let linkTokenizer* = func(parserTokens: seq[ParserToken]): seq[OrgInlineBuilderT] {.closure.} =
|
||||
[@linkUrl, @linkDescription] := parserTokens.map(toString)
|
||||
let linkDescriptionOption = linkDescription.some().notEmpty()
|
||||
return @[
|
||||
OrgInlineBuilderT(
|
||||
kind: orgLink,
|
||||
content: linkStringifier(linkUrl, linkDescriptionOption),
|
||||
linkUrl: linkUrl,
|
||||
linkDescription: linkDescriptionOption,
|
||||
)
|
||||
]
|
||||
|
||||
when isMainModule:
|
||||
block testParsers:
|
||||
proc testParser(str: string, parser: parserFnT): string =
|
||||
initParserResult(str).flatMap(parser).tokensToString()
|
||||
|
||||
assert testParser("[[placeholder.com]]", linkParser) == "placeholder.com"
|
||||
assert testParser("[[placeholder.com][ - title]]", linkParser) == "placeholder.com - title"
|
||||
|
||||
# .linkParser()
|
||||
# .foldTokens(
|
||||
# onError = xs => newSeq[OrgInlineBuilderT](),
|
||||
# onSuccess = linkTokenizer,
|
||||
# )
|
||||
138
src_v2/org/org_builder_paragraph.nim
Normal file
138
src_v2/org/org_builder_paragraph.nim
Normal file
@@ -0,0 +1,138 @@
|
||||
import std/[
|
||||
collections/sequtils,
|
||||
sugar,
|
||||
]
|
||||
import fp/[
|
||||
option,
|
||||
resultM,
|
||||
]
|
||||
import ./org_types
|
||||
import ./org_builder_api
|
||||
import ./org_builder_link
|
||||
import ./org_builder_inline_text
|
||||
import ../parser/parser
|
||||
import ../utils/fp
|
||||
|
||||
import fusion/matching
|
||||
{.experimental: "caseStmtMacros".}
|
||||
|
||||
# let lineTokenizer = (builder: OrgInlineBuilder) => tryTokenize(
|
||||
# builder = builder,
|
||||
# builderFns = styledTextTokenizers,
|
||||
# defaultTokenizerFn = tryTokenizeRawText,
|
||||
# )
|
||||
|
||||
# OrgDocucment
|
||||
|
||||
# -> Seq [Builder -> Builder]
|
||||
|
||||
# One of OrgBlock
|
||||
|
||||
# @[
|
||||
|
||||
# OrgBuilderResult -> OrgBuilderResult
|
||||
# ]
|
||||
# with default OrgBuilder -> OrgBuilder
|
||||
# until Eol
|
||||
|
||||
let lineParser = anyUntil(newlineOrEol)
|
||||
|
||||
let listTypesParser = choice(@[
|
||||
ch('-'),
|
||||
ch('+'),
|
||||
choice(@[letter, digit]) + choice(@[ch('.'), ch(')')]),
|
||||
])
|
||||
|
||||
let listStartParser = manyUntil(space, listTypesParser)
|
||||
let listTypeParser = listTypesParser + ignore(space)
|
||||
let listContentParser = anyUntil(newlineOrEol) + ignore(newlineOrEol)
|
||||
|
||||
let buildListItem = proc(builder: OrgBuilder): OrgBuilderResult {.closure.} =
|
||||
let (parser, tree) = builder
|
||||
|
||||
let beforeSpaces = ParserResult.ok(parser)
|
||||
.flatMap(listStartParser)
|
||||
.map(flattenTokens)
|
||||
|
||||
let listType = beforeSpaces
|
||||
.map(emptyTokens)
|
||||
.flatMap(listTypeParser)
|
||||
.map(flattenTokens)
|
||||
|
||||
let listContent = listType
|
||||
.map(emptyTokens)
|
||||
.flatMap(listContentParser)
|
||||
.map(flattenTokens)
|
||||
|
||||
case (beforeSpaces, listType, listContent):
|
||||
of (Some(@space), Some(@symbol), Some(@content)):
|
||||
let treeResult = tree &
|
||||
OrgBlock(
|
||||
kind: orgList,
|
||||
listIndentation: 1,
|
||||
listBulletType: '-',
|
||||
listContent: tryTokenizeInline(content.tokens.tokensToString()).unsafeGet().tree
|
||||
)
|
||||
|
||||
OrgBuilderResult.ok(OrgBuilder((
|
||||
parser: content,
|
||||
tree: treeResult,
|
||||
)))
|
||||
else: OrgBuilderResult.err(OrgBuilderError(
|
||||
kind: parserError,
|
||||
parser: listContent,
|
||||
tree: tree,
|
||||
))
|
||||
|
||||
proc buildParagraph*(
|
||||
builder: OrgBuilder,
|
||||
builderFns: seq[proc (builder: OrgBuilder): OrgBuilderResult],
|
||||
stopAtParserFn = endOfStream,
|
||||
): OrgBuilderResult =
|
||||
|
||||
var builderAcc: OrgBuilderResult = OrgBuilderResult.ok(builder)
|
||||
|
||||
while builderAcc.isOk() and builderAcc.tryParserResult(stopAtParserFn).isErr():
|
||||
var found = false
|
||||
|
||||
for builderFn in builderFns:
|
||||
# let (builderFn) = fn
|
||||
let builderResult: OrgBuilderResult = builderAcc.flatMap(builderFn)
|
||||
|
||||
if builderResult.isOk():
|
||||
found = true
|
||||
builderAcc = builderResult
|
||||
break
|
||||
|
||||
if not found:
|
||||
builderAcc
|
||||
.flatMap((builder: OrgBuilder) => tryTokenize(
|
||||
builder = builder,
|
||||
builderFns = styledTextTokenizers,
|
||||
defaultTokenizerFn = tryTokenizeRawText,
|
||||
))
|
||||
.fold(
|
||||
OrgInlineBuilder
|
||||
)
|
||||
|
||||
|
||||
builderAcc = OrgBuilderResult.err(OrgBuilderError(
|
||||
kind: builderError,
|
||||
parser: builderAcc.fold(
|
||||
(err: OrgBuilderError) => err.parser,
|
||||
(builder: OrgBuilder) => ParserResult.ok(builder.parser),
|
||||
),
|
||||
tree: builder.tree,
|
||||
))
|
||||
break
|
||||
|
||||
builderAcc
|
||||
|
||||
let paragraphBuilders: seq[proc (builder: OrgBuilder): OrgBuilderResult] = @[buildListItem]
|
||||
|
||||
when isMainModule:
|
||||
block testParsers:
|
||||
let test = initOrgBuilder("""- List item
|
||||
1. List item
|
||||
random stuff""").flatMap((builder: OrgBuilder) => builder.buildParagraph(paragraphBuilders))
|
||||
echo test
|
||||
0
src_v2/org/org_text_delimiter.nim
Normal file
0
src_v2/org/org_text_delimiter.nim
Normal file
249
src_v2/org/org_types.nim
Normal file
249
src_v2/org/org_types.nim
Normal file
@@ -0,0 +1,249 @@
|
||||
import std/[
|
||||
sequtils,
|
||||
strutils,
|
||||
sugar,
|
||||
]
|
||||
import fp/[
|
||||
option,
|
||||
]
|
||||
import fusion/matching
|
||||
import ../utils/printers
|
||||
|
||||
{.experimental: "caseStmtMacros".}
|
||||
|
||||
# -- OrgInlineBlock.Type
|
||||
|
||||
type
|
||||
orgInlineBlockKind* = enum
|
||||
orgRawText,
|
||||
orgText,
|
||||
|
||||
# Formating
|
||||
orgBoldText,
|
||||
orgItalicText,
|
||||
orgUnderlineText,
|
||||
orgVerbatimText,
|
||||
orgCodeText,
|
||||
orgStrikeThroughText,
|
||||
|
||||
# Links
|
||||
orgLink,
|
||||
|
||||
OrgInlineBlock* = ref object
|
||||
children*: seq[OrgInlineBlock]
|
||||
content*: string
|
||||
|
||||
case kind*: orgInlineBlockKind
|
||||
of orgRawText: discard
|
||||
of orgText: discard
|
||||
|
||||
# Formating
|
||||
of orgBoldText: discard
|
||||
of orgItalicText: discard
|
||||
of orgUnderlineText: discard
|
||||
of orgVerbatimText: discard
|
||||
of orgCodeText: discard
|
||||
of orgStrikeThroughText: discard
|
||||
|
||||
# Links
|
||||
of orgLink:
|
||||
linkUrl*: string
|
||||
linkDescription*: Option[string]
|
||||
|
||||
|
||||
# OrgInlineBlock.Equals
|
||||
|
||||
func `==`*(a: OrgInlineBlock, b: OrgInlineBlock): bool =
|
||||
## Compare two `OrgInlineBlock` objects.
|
||||
## TODO Compare children
|
||||
|
||||
if (a.kind != b.kind): return false
|
||||
|
||||
case ((a.kind, b.kind)):
|
||||
of((orgLink, orgLink)):
|
||||
a.linkUrl == b.linkUrl and a.linkDescription == b.linkDescription
|
||||
|
||||
else: a.content == b.content
|
||||
|
||||
# -- OrgInlineBlock.PrettyPrinters
|
||||
|
||||
proc `$`*(x: orgInlineBlockKind): string =
|
||||
case x:
|
||||
of orgRawText: "Text (Raw)"
|
||||
of orgText: "Text"
|
||||
|
||||
# Formating
|
||||
of orgBoldText: "Text (Bold)"
|
||||
of orgItalicText: "Text (Italic)"
|
||||
of orgUnderlineText: "Text (Underline)"
|
||||
of orgVerbatimText: "Text (Verbatim)"
|
||||
of orgCodeText: "Text (Code)"
|
||||
of orgStrikeThroughText: "Text (StrikeThrough)"
|
||||
|
||||
# Links
|
||||
of orgLink: "Link"
|
||||
|
||||
const PPRINT_INDENT_SIZE* = 2
|
||||
|
||||
func pprint*(x: OrgInlineBlock, indent = 0): string =
|
||||
let specialFields = case x.kind:
|
||||
of orgLink:
|
||||
@[
|
||||
("linkUrl", $x.linkUrl, true),
|
||||
("linkDescription", x.linkDescription.getOrElse(""), x.linkDescription.isSome()),
|
||||
]
|
||||
else: @[]
|
||||
|
||||
let fields = @[
|
||||
("kind", $x.kind, true),
|
||||
("content", $x.content, x.content.len != 0),
|
||||
]
|
||||
.concat(specialFields)
|
||||
.stringifyFields()
|
||||
|
||||
stringifyBlock(
|
||||
"OrgInlineBlock",
|
||||
indent,
|
||||
fields,
|
||||
)
|
||||
|
||||
func `$`*(x: OrgInlineBlock): string = pprint(x)
|
||||
|
||||
func pprint*(xs: seq[OrgInlineBlock], indent = 0): string =
|
||||
stringifySeq(xs, (x: OrgInlineBlock) => pprint(x, indent), indent)
|
||||
|
||||
func `$`*(xs: seq[OrgInlineBlock]): string = pprint(xs)
|
||||
|
||||
## -- OrgBlock.Type
|
||||
|
||||
type
|
||||
orgBlockKind* = enum
|
||||
orgHeading
|
||||
orgParagraph
|
||||
orgList
|
||||
orgNewline
|
||||
OrgBlock* = ref object
|
||||
children*: seq[OrgInlineBlock]
|
||||
|
||||
case kind*: orgBlockKind
|
||||
of orgHeading:
|
||||
level*: int
|
||||
todo*: Option[string]
|
||||
headlineContent*: seq[OrgInlineBlock]
|
||||
content*: seq[OrgBlock]
|
||||
of orgParagraph:
|
||||
paragraphContent: seq[OrgInlineBlock]
|
||||
of orgList:
|
||||
listIndentation*: int
|
||||
listBulletType*: char
|
||||
listContent*: seq[OrgInlineBlock]
|
||||
of orgNewline: discard
|
||||
|
||||
## -- OrgBlock.PrettyPrinters
|
||||
|
||||
func pprint*(x: OrgBlock, indent = 0): string =
|
||||
let fields = case x.kind:
|
||||
of orgHeading:
|
||||
@[
|
||||
("kind", $x.kind, true),
|
||||
("level", $x.level, true),
|
||||
("todo", $x.todo, x.todo.isSome()),
|
||||
("headlineContent", $x.headlineContent, x.headlineContent.len != 0),
|
||||
("children", $x.children, x.children.len != 0),
|
||||
(
|
||||
"content",
|
||||
("@[\n" &
|
||||
x.content.map((x) => pprint(x)).join(",\n").indent(PPRINT_INDENT_SIZE) &
|
||||
",\n],"
|
||||
).indent(indent),
|
||||
x.content.len != 0
|
||||
),
|
||||
]
|
||||
of orgParagraph:
|
||||
@[
|
||||
("kind", $x.kind, true),
|
||||
("paragraphContent", $x.paragraphContent, x.paragraphContent.len != 0),
|
||||
]
|
||||
of orgList:
|
||||
@[
|
||||
("kind", $x.kind, true),
|
||||
("listContent", $x.listContent, x.listContent.len != 0),
|
||||
("listIndentation", $x.listIndentation, true),
|
||||
("listBulletType", $x.listBulletType, true),
|
||||
]
|
||||
else:
|
||||
@[
|
||||
("kind", $x.kind, true),
|
||||
]
|
||||
|
||||
stringifyBlock(
|
||||
"OrgBlock",
|
||||
indent,
|
||||
fields.stringifyFields(),
|
||||
)
|
||||
|
||||
func `$`*(x: OrgBlock): string = pprint(x)
|
||||
|
||||
func pprint*(xs: seq[OrgBlock], indent = 0): string =
|
||||
stringifySeq(xs, (x: OrgBlock) => pprint(x, indent), indent)
|
||||
|
||||
func `$`*(xs: seq[OrgBlock]): string = pprint(xs)
|
||||
|
||||
# -- OrgDocument.Type
|
||||
|
||||
type
|
||||
OrgDocument* = ref object
|
||||
children*: seq[OrgBlock]
|
||||
|
||||
# -- OrgDocument.PrettyPrinters
|
||||
|
||||
func pprint*(x: OrgDocument, indent = 0): string =
|
||||
let fields = @[
|
||||
("children", $x.children, true),
|
||||
]
|
||||
.stringifyFields()
|
||||
|
||||
stringifyBlock(
|
||||
"OrgDocument",
|
||||
indent,
|
||||
fields,
|
||||
)
|
||||
|
||||
func `$`*(xs: OrgDocument): string = pprint(xs)
|
||||
|
||||
when isMainModule:
|
||||
|
||||
block testOrgInlineBlockEquals:
|
||||
assert OrgInlineBlock(kind: orgRawText) == OrgInlineBlock(kind: orgRawText)
|
||||
assert OrgInlineBlock(kind: orgRawText, content: "") == OrgInlineBlock(kind: orgRawText, content: "")
|
||||
assert OrgInlineBlock(kind: orgRawText, content: "a") != OrgInlineBlock(kind: orgRawText, content: "b")
|
||||
assert OrgInlineBlock(kind: orgRawText) != OrgInlineBlock(kind: orgBoldText)
|
||||
# Link
|
||||
assert OrgInlineBlock(kind: orgLink, linkDescription: "desc".some(), linkUrl: "url") == OrgInlineBlock(kind: orgLink, linkDescription: "desc".some(), linkUrl: "url")
|
||||
assert OrgInlineBlock(kind: orgLink, linkUrl: "url") != OrgInlineBlock(kind: orgLink, linkDescription: "desc".some(), linkUrl: "url")
|
||||
|
||||
# let doc = OrgDocument(
|
||||
# children: @[
|
||||
# OrgBlock(
|
||||
# kind: orgHeading,
|
||||
# children: @[
|
||||
# OrgInlineBlock(
|
||||
# kind: orgLink,
|
||||
# linkUrl: "https://placeholder.com",
|
||||
# linkDescription: "Placeholder".some(),
|
||||
# ),
|
||||
# ],
|
||||
# content: @[
|
||||
# OrgBlock(
|
||||
# kind: orgNewline,
|
||||
# ),
|
||||
# OrgBlock(
|
||||
# kind: orgNewline,
|
||||
# )
|
||||
# ],
|
||||
# ),
|
||||
# OrgBlock(kind: orgHeading, level: 1),
|
||||
# OrgBlock(kind: orgHeading),
|
||||
# ]
|
||||
# )
|
||||
# echo doc
|
||||
3
src_v2/org/test.nim
Normal file
3
src_v2/org/test.nim
Normal file
@@ -0,0 +1,3 @@
|
||||
import experimental/diff
|
||||
|
||||
echo diff(@[1,2,3], @[1,2,3])
|
||||
119
src_v2/parser/builder_api.nim
Normal file
119
src_v2/parser/builder_api.nim
Normal file
@@ -0,0 +1,119 @@
|
||||
import std/[
|
||||
collections/sequtils,
|
||||
options,
|
||||
strformat,
|
||||
strutils,
|
||||
sugar,
|
||||
]
|
||||
import fp/[
|
||||
resultM
|
||||
]
|
||||
import ./builder_types
|
||||
import ./parser_types
|
||||
import ./parser_api
|
||||
|
||||
# -- Builder API
|
||||
|
||||
proc tryTokenize*[T](
|
||||
builder: Builder[T],
|
||||
builderFns: seq[tuple[
|
||||
parserFn: parserFnT,
|
||||
tokenizerFn: seq[ParserToken] -> seq[T],
|
||||
]],
|
||||
defaultTokenizerFn: seq[ParserToken] -> seq[T],
|
||||
stopAtParserFn = newlineOrEol,
|
||||
concatTokensFn = concat[T],
|
||||
): BuilderResult[T] =
|
||||
## Try to tokenize text in `builder` by checking `builderFns` seq for a sucessful `parserFn`.
|
||||
## When a `parserFn.ok` is found, tokenize the text in the `Parser` using the current tokenizerFn
|
||||
## and merge the result into `builder.tree` using the `concatTokensFn`.
|
||||
## When no parser matches, tokenize with the `defaultTokenizerFn` until `stopAtParserFn is matched.
|
||||
|
||||
let (parser, tree) = builder
|
||||
# Mutating accumulators
|
||||
var parserAcc: ParserResult = ParserResult.ok(parser)
|
||||
var builderAcc: Builder[T] = builder
|
||||
|
||||
while parserAcc.isOk() and parserAcc.flatMap(stopAtParserFn).isErr():
|
||||
# Empty the parser tokens as we want to seperate them for the next parser in the sequence
|
||||
let emptyParser = parserAcc.map(emptyTokens)
|
||||
|
||||
# Find the a parser and convert its tokens with the `tokenizerFn`
|
||||
var found = false
|
||||
for fn in builderFns:
|
||||
let (parserFn, tokenizerFn) = fn
|
||||
|
||||
let parseResult = emptyParser.flatMap(parserFn)
|
||||
if parseResult.isOk():
|
||||
found = true
|
||||
|
||||
# Convert all previous unmatched tokens via the `defaultTokenizerFn`
|
||||
let defaultBuilderTokens = parserAcc
|
||||
.foldTokens(
|
||||
onErrorFn = _ => newSeq[T](),
|
||||
onSuccessFn = defaultTokenizerFn,
|
||||
)
|
||||
|
||||
let okParser = parseResult.unsafeGet()
|
||||
parserAcc = parseResult.map(emptyTokens)
|
||||
builderAcc = initBuilder(
|
||||
okParser,
|
||||
concatTokensFn(
|
||||
builderAcc.tree,
|
||||
defaultBuilderTokens,
|
||||
tokenizerFn(okParser.tokens),
|
||||
)
|
||||
)
|
||||
break
|
||||
|
||||
if not found:
|
||||
parserAcc = parserAcc.flatMap(anyCh)
|
||||
|
||||
let defaultBuilderTokens = parserAcc
|
||||
.foldTokens(
|
||||
onErrorFn = _ => newSeq[T](),
|
||||
onSuccessFn = defaultTokenizerFn,
|
||||
)
|
||||
|
||||
BuilderResult[T].ok(initBuilder(
|
||||
builderAcc.parser,
|
||||
concatTokensFn(
|
||||
builderAcc.tree,
|
||||
defaultBuilderTokens,
|
||||
),
|
||||
))
|
||||
|
||||
# -- Stringifiers
|
||||
|
||||
func pprint[T](x: Builder): string =
|
||||
&"""Builder()
|
||||
"""
|
||||
|
||||
when isMainModule:
|
||||
type TestStringBuilderT* = string
|
||||
type TestStringBuilder* = Builder[TestStringBuilderT]
|
||||
type TestStringBuilderResult* = BuilderResult[TestStringBuilderT]
|
||||
|
||||
block testApi:
|
||||
let testParensParser = anyBetween(str(" ("), str(") "))
|
||||
|
||||
proc testDefaultTokenizer(tokens: seq[ParserToken]): seq[string] {.closure.} =
|
||||
@[tokens.toString()]
|
||||
proc testParensTokenizer(tokens: seq[ParserToken]): seq[string] {.closure.} =
|
||||
@[&"Parens({tokens.toString()})"]
|
||||
|
||||
let testBuilder = initBuilder(
|
||||
initParser("""sentence (with parens) and more
|
||||
And ignore this part"""),
|
||||
newSeq[TestStringBuilderT]()
|
||||
)
|
||||
.tryTokenize(
|
||||
builderFns = @[(
|
||||
testParensParser,
|
||||
testParensTokenizer
|
||||
)],
|
||||
defaultTokenizerFn = testDefaultTokenizer,
|
||||
)
|
||||
|
||||
assert testBuilder.isOk() == true
|
||||
assert testBuilder.unsafeGet().tree == @["sentence", "Parens(with parens)", "and more"]
|
||||
96
src_v2/parser/builder_types.nim
Normal file
96
src_v2/parser/builder_types.nim
Normal file
@@ -0,0 +1,96 @@
|
||||
import std/[
|
||||
sugar,
|
||||
]
|
||||
import fp/[
|
||||
resultM,
|
||||
]
|
||||
import ./parser_types
|
||||
import ./parser_api
|
||||
|
||||
type
|
||||
Builder*[T] = tuple[
|
||||
parser: Parser,
|
||||
tree: seq[T]
|
||||
]
|
||||
builderErrorKind* = enum
|
||||
parserError
|
||||
builderError
|
||||
BuilderError*[T] = ref object
|
||||
kind*: builderErrorKind
|
||||
parser*: ParserResult
|
||||
tree*: seq[T]
|
||||
BuilderResult*[T] = Result[Builder[T], BuilderError[T]]
|
||||
|
||||
# -- Initalizers
|
||||
|
||||
proc initBuilder*[T](parser: Parser, tree: seq[T]): Builder[T] =
|
||||
Builder[T]((
|
||||
parser,
|
||||
tree
|
||||
))
|
||||
|
||||
# -- Modifiers
|
||||
|
||||
proc setParser*[T](builder: Builder[T], parser: Parser): Builder[T] =
|
||||
Builder[T]((
|
||||
parser,
|
||||
builder.tree,
|
||||
))
|
||||
|
||||
proc setTree*[T](builder: Builder[T], tree: seq[T]): Builder[T] =
|
||||
Builder[T]((
|
||||
builder.parser,
|
||||
tree,
|
||||
))
|
||||
|
||||
proc tryParser*[T](
|
||||
builder: Builder[T],
|
||||
parserFn: parserFnT,
|
||||
): BuilderResult[T] =
|
||||
## Try out a `parser` on a `builder`
|
||||
## When successful return the original builder, otherwise return an error
|
||||
let parser = ParserResult
|
||||
.ok(Parser(
|
||||
state: builder.parser.state,
|
||||
tokens: @[],
|
||||
))
|
||||
.flatMap(parserFn)
|
||||
|
||||
parser
|
||||
.foldTokens(
|
||||
(err: ParserError) => BuilderResult[T].err(BuilderError[T](
|
||||
kind: parserError,
|
||||
parser: parser,
|
||||
tree: builder.tree,
|
||||
)),
|
||||
(newTokens: seq[ParserToken]) => BuilderResult[T].ok(builder),
|
||||
)
|
||||
proc tryParserResult*[T](
|
||||
builder: BuilderResult[T],
|
||||
parser: Parser -> ParserResult,
|
||||
): BuilderResult[T] =
|
||||
## Try out a `parser` on a `builder` result
|
||||
## When succesful return the ok builder, otherwise return an error
|
||||
builder.flatMap((x: Builder[T]) => tryParser(x, parser))
|
||||
|
||||
when isMainModule:
|
||||
type TestStringBuilderT* = string
|
||||
type TestStringBuilder* = Builder[TestStringBuilderT]
|
||||
type TestStringBuilderResult* = BuilderResult[TestStringBuilderT]
|
||||
|
||||
let testBuilder123 = initBuilder(
|
||||
initParser("123"),
|
||||
newSeq[TestStringBuilderT]()
|
||||
)
|
||||
|
||||
block testModifiers:
|
||||
# setters
|
||||
assert testBuilder123.setParser(initParser("abc")).parser.state.stream == "abc"
|
||||
assert testBuilder123.setTree(@["abc"]).tree[0] == "abc"
|
||||
|
||||
# tryParser
|
||||
assert testBuilder123.tryParser(ch('1')).isOk() == true
|
||||
assert testBuilder123.tryParser(ch('2')).isErr() == true
|
||||
|
||||
# echo testBuilder123
|
||||
# block testModifiers:
|
||||
9
src_v2/parser/parser.nim
Normal file
9
src_v2/parser/parser.nim
Normal file
@@ -0,0 +1,9 @@
|
||||
import parser_types
|
||||
import parser_api
|
||||
import builder_types
|
||||
import builder_api
|
||||
|
||||
export parser_types
|
||||
export parser_api
|
||||
export builder_types
|
||||
export builder_api
|
||||
401
src_v2/parser/parser_api.nim
Normal file
401
src_v2/parser/parser_api.nim
Normal file
@@ -0,0 +1,401 @@
|
||||
import std/[
|
||||
options,
|
||||
strutils,
|
||||
strformat,
|
||||
collections/sequtils,
|
||||
sugar,
|
||||
]
|
||||
import fp/[
|
||||
maybe,
|
||||
resultM,
|
||||
]
|
||||
import ../utils/str
|
||||
import ./parser_types
|
||||
|
||||
# -- Utilities
|
||||
|
||||
proc isStreamCompleted*(parser: Parser): bool =
|
||||
## Check if the `parser` index is at/over the stream length.
|
||||
parser.state.position >= parser.state.stream.len - 1
|
||||
|
||||
proc isStreamCompleted*(parserResult: ParserResult): bool =
|
||||
## Check if the `parserResult.state` index is at/over the stream length.
|
||||
parserResult.fold(
|
||||
err => false,
|
||||
isStreamCompleted,
|
||||
)
|
||||
|
||||
# -- Parsing functions
|
||||
|
||||
proc ch*(expectedChars: set[char]): parserFnT {.inline.} =
|
||||
## Create parser function with set of `expectedChar`.
|
||||
## When the parser has the character set at the following index return `ParserResult.ok`.
|
||||
return proc(parser: Parser): ParserResult =
|
||||
let state = parser.state
|
||||
let newIndex = state.position + 1
|
||||
|
||||
if newIndex > (state.stream.len - 1):
|
||||
return err(ParserError(
|
||||
kind: endOfStringErr,
|
||||
expected: expectedChars.prettyExpectedSet(),
|
||||
index: newIndex,
|
||||
parser: parser,
|
||||
))
|
||||
else:
|
||||
let foundChar = state.stream[newIndex]
|
||||
if foundChar in expectedChars:
|
||||
return Parser(
|
||||
state: ParserState(
|
||||
stream: state.stream,
|
||||
position: newIndex,
|
||||
lastPosition: parser.state.position,
|
||||
),
|
||||
tokens: parser.tokens & initParserToken(foundChar)
|
||||
).ok()
|
||||
else:
|
||||
return err(ParserError(
|
||||
kind: charMismatchErr,
|
||||
unexpected: $foundChar,
|
||||
expected: expectedChars.prettyExpectedSet(),
|
||||
index: newIndex,
|
||||
parser: parser,
|
||||
))
|
||||
|
||||
proc ch*(expectedChar: char): parserFnT {.inline.} =
|
||||
## Creates parser function with `expectedChar`
|
||||
## When the parser has the character at the following index return `ParserResult.ok`
|
||||
return proc(parser: Parser): ParserResult =
|
||||
let state = parser.state
|
||||
let newIndex = state.position + 1
|
||||
|
||||
if newIndex > (state.stream.len - 1):
|
||||
return err(ParserError(
|
||||
kind: endOfStringErr,
|
||||
expected: &"{expectedChar}",
|
||||
index: newIndex,
|
||||
parser: parser,
|
||||
))
|
||||
else:
|
||||
let foundChar = state.stream[newIndex]
|
||||
if expectedChar == foundChar:
|
||||
return Parser(
|
||||
state: ParserState(
|
||||
stream: state.stream,
|
||||
position: newIndex,
|
||||
lastPosition: parser.state.position,
|
||||
),
|
||||
tokens: parser.tokens & initParserToken(foundChar)
|
||||
).ok()
|
||||
else:
|
||||
return err(ParserError(
|
||||
kind: charMismatchErr,
|
||||
unexpected: &"{foundChar}",
|
||||
expected: &"{expectedChar}",
|
||||
index: newIndex,
|
||||
parser: parser,
|
||||
))
|
||||
|
||||
let anyCh* = ch(AllChars)
|
||||
let digit* = ch(Digits)
|
||||
let letter* = ch(Letters)
|
||||
let space* = ch(' ')
|
||||
let whitespace* = ch(Whitespace)
|
||||
let newline* = ch(Newlines)
|
||||
|
||||
proc str*(expectedString: string): parserFnT {.inline.} =
|
||||
## Creates parser function with `expectedString`
|
||||
## When the parser has the string at the following index return `ParserResult.ok`
|
||||
return proc(parser: Parser): ParserResult =
|
||||
var res: ParserResult = parser.ok()
|
||||
for c in expectedString.items:
|
||||
if res.isErr: break
|
||||
res = res.flatMap(ch(c))
|
||||
return res
|
||||
|
||||
proc startOfStream*(parser: Parser): ParserResult =
|
||||
## Check if the following character is the end of the stream.
|
||||
## Errors when the end of stream was already reached.
|
||||
let position = parser.state.position
|
||||
|
||||
if position == 0 or position == -1:
|
||||
ok(parser)
|
||||
else:
|
||||
err(ParserError(
|
||||
kind: startOfStringErr,
|
||||
expected: "startOfStream",
|
||||
index: position,
|
||||
parser: parser,
|
||||
))
|
||||
|
||||
proc endOfStream*(parser: Parser): ParserResult =
|
||||
## Check if the following character is the end of the stream.
|
||||
## Errors when the end of stream was already reached.
|
||||
let state = parser.state
|
||||
let newIndex = state.position + 1
|
||||
|
||||
if newIndex == parser.state.stream.len:
|
||||
ok(parser)
|
||||
elif newIndex > parser.state.stream.len:
|
||||
return err(ParserError(
|
||||
kind: endOfStringErr,
|
||||
expected: "endOfStream",
|
||||
index: newIndex,
|
||||
parser: parser,
|
||||
))
|
||||
else:
|
||||
let foundChar = state.stream[newIndex]
|
||||
err(ParserError(
|
||||
kind: charMismatchErr,
|
||||
unexpected: $foundChar,
|
||||
expected: "endOfStream",
|
||||
index: newIndex,
|
||||
parser: parser,
|
||||
))
|
||||
|
||||
# -- Parsing API
|
||||
|
||||
proc plus*(parserFnA: parserFnT, parserFnB: parserFnT): parserFnT {.inline.} =
|
||||
## Creates parser function with combining two parser functions `parserFnA` plus `parserFnB`
|
||||
return proc(parser: Parser): ParserResult =
|
||||
parserFnA(parser).flatMap(parserFnB)
|
||||
|
||||
proc `+`*(parserFnA: parserFnT, parserFnB: parserFnT): parserFnT {.inline.} =
|
||||
## Creates infix parser function with combining two parser functions `parserFnA` plus `parserFnB`
|
||||
return proc(parser: Parser): ParserResult =
|
||||
parserFnA(parser).flatMap(parserFnB)
|
||||
|
||||
proc optional*(parserFn: parserFnT): parserFnT {.inline.} =
|
||||
## Creates parser function with a nested `parserFn`:
|
||||
## Continues on succesful parser
|
||||
## Ignores failing parsers
|
||||
return proc(parser: Parser): ParserResult =
|
||||
let newParser = parserFn(parser)
|
||||
if newParser.isOk():
|
||||
newParser
|
||||
else:
|
||||
parser.ok()
|
||||
|
||||
proc ignore*(parserFn: parserFnT): parserFnT {.inline.} =
|
||||
## Creates parser function with a nested `parserFn`:
|
||||
## Parses using the `parserFn` but dont capture the resulting tokens.
|
||||
return proc(parser: Parser): ParserResult =
|
||||
return parserFn(parser)
|
||||
.map((x: Parser) => Parser(
|
||||
state: x.state,
|
||||
tokens: parser.tokens,
|
||||
))
|
||||
|
||||
proc peek*(amount: int, parserFn: parserFnT): parserFnT {.inline.} =
|
||||
## Creates parser function with a nested `parserFn`:
|
||||
## Parses using the `parserFn` but dont capture the resulting tokens and keep the current position.
|
||||
return proc(parser: Parser): ParserResult =
|
||||
let state = parser.state
|
||||
let newIndex = state.position + amount
|
||||
|
||||
let newParser = initParser(
|
||||
stream = state.stream,
|
||||
tokens = parser.tokens,
|
||||
position = newIndex,
|
||||
lastPosition = state.position,
|
||||
)
|
||||
|
||||
parserFn(newParser)
|
||||
.map(p => parser)
|
||||
|
||||
proc peek1*(parserFn: parserFnT): parserFnT = peek(1, parserFn)
|
||||
proc peekCurrent*(parserFn: parserFnT): parserFnT = peek(0, parserFn)
|
||||
proc peekBack1*(parserFn: parserFnT): parserFnT = peek(-1, parserFn)
|
||||
|
||||
proc manyUntil*(acceptFn: parserFnT, stopFn: parserFnT): parserFnT {.inline.} =
|
||||
## Creates parser function with a nested `acceptFn` parser function until the `stopFn` parserFunction is met:
|
||||
## Parses until the `stopFn` is reached or on an errror.
|
||||
return proc(parser: Parser): ParserResult =
|
||||
var res: ParserResult = parser.ok()
|
||||
while res.isOk() and res.flatMap(stopFn).isErr():
|
||||
res = res.flatMap(acceptFn)
|
||||
return res
|
||||
|
||||
proc anyUntil*(stopFn: parserFnT): parserFnT {.inline.} =
|
||||
## Parses any character until the `stopFn` is reached or on an errror.
|
||||
## Needs at least one character match.
|
||||
manyUntil(anyCh, stopFn)
|
||||
|
||||
proc choice*(parserFns: seq[parserFnT]): parserFnT {.inline} =
|
||||
## creates parser function that checks any of the `parserFns`.
|
||||
## Needs one match for a `ParserResult.ok`.
|
||||
return proc(parser: Parser): ParserResult {.closure.} =
|
||||
var errors: seq[ParserResult] = newSeq[ParserResult]()
|
||||
var found = Nothing[ParserResult]()
|
||||
|
||||
for fn in parserFns:
|
||||
let fnResult: ParserResult = fn(parser)
|
||||
|
||||
if fnResult.isOk():
|
||||
found = fnResult.just
|
||||
break
|
||||
else:
|
||||
errors = errors & fnResult
|
||||
|
||||
return found
|
||||
.fold(
|
||||
proc(): ParserResult =
|
||||
let prettyErrors = errors.map((x: ParserResult) => x.error().expected)
|
||||
err(ParserError(
|
||||
kind: choiceMismatchErr,
|
||||
index: parser.state.position + 1,
|
||||
expected: &"Choice ({prettyErrors})",
|
||||
unexpected: errors[0].error().unexpected,
|
||||
parser: parser,
|
||||
)),
|
||||
proc(x: ParserResult): ParserResult = x,
|
||||
)
|
||||
|
||||
proc following*(parserFns: seq[parserFnT]): parserFnT {.inline.} =
|
||||
## Checks a sequence of `parserFns`.
|
||||
## All of them need to be `ParserResult.ok`
|
||||
return proc(parser: Parser): ParserResult {.closure.} =
|
||||
parserFns.foldl(a.flatMap(b), parser.ok)
|
||||
|
||||
proc between*(startParserFn: parserFnT, stopParserFn: parserFnT): parserFnT -> parserFnT {.inline.} =
|
||||
## Creates parser function with that matches a `parserFn` between `startParserFn` and `stopParserFn`.
|
||||
## Ignores the delimiters in the tokens.
|
||||
## Example:
|
||||
## between(ch('('), ch(')'))(str("abc")) => Matches (abc)
|
||||
return proc(parserFn: parserFnT): parserFnT {.closure.} =
|
||||
ignore(startParserFn) + parserFn + ignore(stopParserFn)
|
||||
|
||||
let betweenPair* = proc(delimiterParserFn: parserFnT): parserFnT -> parserFnT {.inline.} =
|
||||
## Creates parser function with that matches anything between matching `delimiterParserFn`
|
||||
## Ignores the delimiters in the tokens.
|
||||
## Example:
|
||||
## betweenPair(ch('"')(str("abc"))) => Matches "abc"
|
||||
return proc(parserFn: parserFnT): parserFnT {.closure.} =
|
||||
ignore(delimiterParserFn) + parserFn + ignore(delimiterParserFn)
|
||||
|
||||
proc anyBetween*(startParserFn: parserFnT, stopParserFn: parserFnT): parserFnT {.inline.} =
|
||||
## Creates parser function with that matches anything between `startParserFn` and `stopParserFn`.
|
||||
## Ignores the delimiters in the tokens.
|
||||
## Example:
|
||||
## anyBetween(ch('('), ch(')')) => Matches (abc)
|
||||
between(startParserFn, stopParserFn)(anyUntil(stopParserFn))
|
||||
|
||||
let anyBetweenPair* = proc(parserFn: parserFnT): parserFnT {.closure.} =
|
||||
## Creates parser function with that matches anything between matching `parserFns`
|
||||
## Ignores the delimiters in the tokens.
|
||||
## Example:
|
||||
## anyBetweenPair(ch('"')) => Matches "abc"
|
||||
anyBetween(parserFn, parserFn)
|
||||
|
||||
# -- Parsing Aliases
|
||||
|
||||
const newlineEolExpectedErr = "NewlineEol"
|
||||
let newlineOrEolParser = choice(@[
|
||||
ch(NewLines),
|
||||
endOfStream,
|
||||
])
|
||||
proc newlineOrEol*(parser: Parser): ParserResult =
|
||||
newlineOrEolParser(parser)
|
||||
.mapErr((x: ParserError) => x.setErrorExpectedField(newlineEolExpectedErr))
|
||||
|
||||
const whitespaceEolExpectedErr = "WhitespaceEol"
|
||||
let whitespaceOrEolParser = choice(@[
|
||||
ch(Whitespace),
|
||||
newlineOrEolParser,
|
||||
])
|
||||
proc whitespaceOrEol*(parser: Parser): ParserResult =
|
||||
whitespaceOrEolParser(parser)
|
||||
.mapErr((x: ParserError) => x.setErrorExpectedField(whitespaceEolExpectedErr))
|
||||
|
||||
# -- Tests
|
||||
|
||||
when isMainModule:
|
||||
let testParser123 = initParserResult("123")
|
||||
let testAbc1Parser = initParserResult("abc1")
|
||||
|
||||
block testParsingFunctions:
|
||||
let ch1 = ch('1')
|
||||
|
||||
# Success
|
||||
assert testParser123.flatMap(ch1).tokensToString() == "1"
|
||||
assert testParser123.flatMap(anyCh).tokensToString() == "1"
|
||||
assert testParser123.flatMap(str("123")).tokensToString() == "123"
|
||||
|
||||
# Mismatch
|
||||
assert testParser123.flatMap(ch('2')).error().kind == charMismatchErr
|
||||
assert testParser123.flatMap(ch(Letters)).error().kind == charMismatchErr
|
||||
assert testParser123.flatMap(str("1234")).error().kind == endOfStringErr
|
||||
assert testParser123.flatMap(str("456")).error().kind == charMismatchErr
|
||||
|
||||
# Out of bounds
|
||||
# assert initParserResult("").flatMap(ch1).error().kind == endOfStringErr
|
||||
assert initParserResult("1").flatMap(ch1).flatMap(ch1).error().kind == endOfStringErr
|
||||
|
||||
# Stream end reached
|
||||
assert initParserResult("1").flatMap(ch1).isStreamCompleted() == true
|
||||
assert initParserResult("12").flatMap(ch1).isStreamCompleted() == false
|
||||
assert initParserResult("").flatMap(ch1).isStreamCompleted() == false
|
||||
assert testParser123.flatMap(str("123")).isStreamCompleted() == true
|
||||
|
||||
# endOfStream
|
||||
assert testParser123.flatMap(str("123") + endOfStream).tokensToString() == "123"
|
||||
|
||||
block testParsingApi:
|
||||
# plus, +
|
||||
assert testAbc1Parser.flatMap(str("abc") + ch('1')).tokensToString() == "abc1"
|
||||
assert testParser123.flatMap(str("12").plus(digit)).tokensToString() == "123"
|
||||
|
||||
# optional
|
||||
assert testParser123.flatMap(optional(ch('1'))).tokensToString() == "1"
|
||||
assert testParser123.flatMap(optional(ch('2'))).tokensToString() == ""
|
||||
|
||||
# ignore
|
||||
assert testParser123.flatMap(ignore(ch('1'))).tokensToString() == ""
|
||||
|
||||
# peek
|
||||
assert testParser123.flatMap(ch('1') + peekCurrent(ch('2')) + ch('2')).tokensToString() == "12"
|
||||
assert testParser123.flatMap(ch('1') + peekCurrent(ch('1'))).isErr() == true
|
||||
assert testParser123.flatMap(peekCurrent(startOfStream)).isOk() == true
|
||||
assert testParser123.flatMap(ch('1') + peekBack1(startOfStream)).isOk() == true
|
||||
|
||||
# manyUntil
|
||||
assert testAbc1Parser.flatMap(manyUntil(anyCh, digit)).tokensToString() == "abc"
|
||||
|
||||
# anyUntil
|
||||
assert testAbc1Parser.flatMap(anyUntil(digit)).tokensToString() == "abc"
|
||||
|
||||
# choice
|
||||
assert testAbc1Parser.flatMap(choice(@[digit, ch('a')])).tokensToString() == "a"
|
||||
assert testAbc1Parser.flatMap(choice(@[digit])).error().kind == choiceMismatchErr
|
||||
|
||||
# parse
|
||||
assert testAbc1Parser.flatMap(following(@[ch('a'), str("bc"), digit])).tokensToString() == "abc1"
|
||||
|
||||
block testParsingHelpers:
|
||||
let testParenParser = initParserResult("(123)")
|
||||
let testQuoteParser = initParserResult("\"123\"")
|
||||
let testQuote = ch('"')
|
||||
let testBetweenParen = between(ch('('), ch(')'))
|
||||
let testBetweenQuotes = betweenPair(ch('"'))
|
||||
|
||||
# between
|
||||
assert testParenParser.flatMap(testBetweenParen(str("123"))).tokensToString() == "123"
|
||||
assert testQuoteParser.flatMap(testBetweenQuotes(str("123"))).tokensToString() == "123"
|
||||
|
||||
# anyBetween
|
||||
assert testParenParser.flatMap(anyBetween(ch('('), ch(')'))).tokensToString() == "123"
|
||||
assert testQuoteParser.flatMap(anyBetweenPair(testQuote)).tokensToString() == "123"
|
||||
|
||||
block testParsingAliases:
|
||||
assert initParserResult("").flatMap(newlineOrEol).isOk() == true
|
||||
assert initParserResult("abc ").flatMap(str("abc") + newlineOrEol).error().expected == newlineEolExpectedErr
|
||||
assert initParserResult("").flatMap(whitespaceOrEol).isOk() == true
|
||||
assert initParserResult("abc ").flatMap(str("abc") + whitespaceOrEol + whitespaceOrEol).tokensToString() == "abc "
|
||||
|
||||
block testImplementations:
|
||||
let testSentenceStr = "This is a sentence\n\nFollowing another sentence."
|
||||
let testSentenceParser = initParserResult(testSentenceStr)
|
||||
|
||||
let testSentence1 = following(@[anyUntil(newline), newline, newlineOrEol])
|
||||
.plus(manyUntil(choice(@[letter, whitespace]), ch('.')) + ch('.') + newlineOrEol)
|
||||
|
||||
assert testSentenceParser.flatMap(testSentence1).tokensToString() == testSentenceStr
|
||||
0
src_v2/parser/parser_internals.nim
Normal file
0
src_v2/parser/parser_internals.nim
Normal file
353
src_v2/parser/parser_types.nim
Normal file
353
src_v2/parser/parser_types.nim
Normal file
@@ -0,0 +1,353 @@
|
||||
import std/[
|
||||
strutils,
|
||||
strformat,
|
||||
collections/sequtils,
|
||||
sugar,
|
||||
]
|
||||
import fp/[
|
||||
resultM,
|
||||
]
|
||||
import fusion/matching
|
||||
import ../utils/str
|
||||
|
||||
{.experimental: "caseStmtMacros".}
|
||||
|
||||
type
|
||||
ParserState* = ref object
|
||||
stream*: string
|
||||
position*, lastPosition*: int
|
||||
|
||||
parserTokenCharValueT* = char
|
||||
parserTokenStringValueT* = string
|
||||
ParserTokenKind* = enum
|
||||
parserTokenChar
|
||||
parserTokenString
|
||||
ParserToken* = ref object
|
||||
case kind*: ParserTokenKind
|
||||
of parserTokenChar:
|
||||
charValue*: parserTokenCharValueT
|
||||
of parserTokenString:
|
||||
stringValue*: parserTokenStringValueT
|
||||
|
||||
Parser* = ref object
|
||||
state*: ParserState
|
||||
tokens*: seq[ParserToken]
|
||||
ParserErrorKind* = enum
|
||||
choiceMismatchErr
|
||||
charMismatchErr
|
||||
endOfStringErr
|
||||
startOfStringErr
|
||||
ParserError* = ref object
|
||||
kind*: ParserErrorKind
|
||||
unexpected*: string
|
||||
expected*: string
|
||||
index*: int
|
||||
parser*: Parser
|
||||
ParserResult* = Result[Parser, ParserError]
|
||||
|
||||
type parserFnT* = proc(t0: Parser): ParserResult
|
||||
|
||||
# -- Initalizers
|
||||
|
||||
func initParserToken*(x: char): ParserToken =
|
||||
## Initialize `ParserToken` as `parserTokenChar` with value `x`.
|
||||
ParserToken(kind: parserTokenChar, charValue: x)
|
||||
func initParserToken*(x: string): ParserToken =
|
||||
## Initialize `ParserToken` as `parserTokenString` with value `x`.
|
||||
ParserToken(kind: parserTokenString, stringValue: x)
|
||||
|
||||
func initParser*(
|
||||
stream: string,
|
||||
tokens = newSeq[ParserToken](),
|
||||
position = -1,
|
||||
lastPosition = 0,
|
||||
): Parser =
|
||||
## Initialize `Parser` with `stream`.
|
||||
Parser(
|
||||
state: ParserState(
|
||||
stream: stream,
|
||||
position: position,
|
||||
lastPosition: lastPosition,
|
||||
),
|
||||
tokens: tokens,
|
||||
)
|
||||
|
||||
func initParserResult*(
|
||||
stream: string,
|
||||
tokens = newSeq[ParserToken](),
|
||||
position = -1,
|
||||
lastPosition = 0,
|
||||
): ParserResult =
|
||||
## Initialize `ParserResult.ok` with `stream`.
|
||||
ParserResult.ok(initParser(
|
||||
stream,
|
||||
tokens,
|
||||
position,
|
||||
lastPosition,
|
||||
))
|
||||
|
||||
# -- Equalizers
|
||||
|
||||
func `==`*(a: ParserToken, b: ParserToken): bool =
|
||||
## Compare two `ParserToken` objects.
|
||||
case ((a.kind, b.kind)):
|
||||
of (parserTokenChar, parserTokenChar): a.charValue == b.charValue
|
||||
of (parserTokenString, parserTokenString): a.stringValue == b.stringValue
|
||||
else: false
|
||||
|
||||
# -- Getters
|
||||
|
||||
func toString*(x: ParserToken): string =
|
||||
## Get the `ParserToken` value `x` as a string.
|
||||
case x.kind:
|
||||
of parserTokenChar:
|
||||
$x.charValue
|
||||
of parserTokenString:
|
||||
x.stringValue
|
||||
|
||||
func toString*(tokens: seq[ParserToken]): string =
|
||||
## Get the seq `ParserToken` value `x` as a string.
|
||||
tokens.foldl(a & b.toString(), "")
|
||||
|
||||
# -- Modifiers
|
||||
|
||||
func flattenTokens*(parser: Parser): Parser =
|
||||
## Flatten tokens in `parser` as a string `ParserToken`.
|
||||
let token = parser.tokens
|
||||
.foldl(a & b.toString(), "")
|
||||
.initParserToken()
|
||||
|
||||
Parser(
|
||||
state: parser.state,
|
||||
tokens: @[token]
|
||||
)
|
||||
|
||||
func flattenTokensResult*(parser: Parser): ParserResult =
|
||||
parser
|
||||
.flattenTokens()
|
||||
.ok()
|
||||
|
||||
func emptyTokens*(parser: Parser): Parser =
|
||||
## Empty `parser` tokens.
|
||||
Parser(
|
||||
state: parser.state,
|
||||
tokens: newSeq[ParserToken](),
|
||||
)
|
||||
|
||||
func foldTokens*[T](
|
||||
parserResult: ParserResult,
|
||||
onErrorFn: ParserError -> T,
|
||||
onSuccessFn: seq[ParserToken] -> T,
|
||||
): T =
|
||||
## Fold over `tokens` inside `ParserResult` with `onSuccessFn` or `onErrorFn`.
|
||||
if parserResult.isOk():
|
||||
onSuccessFn(parserResult.unsafeGet().tokens)
|
||||
else:
|
||||
let err = parserResult.error()
|
||||
onErrorFn(err)
|
||||
|
||||
func tokensToString*(parserResult: ParserResult, fallback = ""): string =
|
||||
parserResult.foldTokens(
|
||||
err => fallback,
|
||||
xs => xs.toString(),
|
||||
)
|
||||
|
||||
func tokensToString*(tokens: seq[ParserToken]): string =
|
||||
tokens.foldl(a & b.toString(), "")
|
||||
|
||||
func setErrorExpectedField*(err: ParserError, expected: string): ParserError =
|
||||
ParserError(
|
||||
kind: err.kind,
|
||||
unexpected: err.unexpected,
|
||||
expected: expected,
|
||||
index: err.index,
|
||||
parser: err.parser,
|
||||
)
|
||||
|
||||
# -- Stringifiers
|
||||
|
||||
func pprint*(x: ParserToken): string =
|
||||
let str = toString(x)
|
||||
let escapedChar = if str == "\n": "\\n"
|
||||
else: str
|
||||
&"""ParserToken("{escapedChar}")
|
||||
"""
|
||||
|
||||
func `$`*(x: ParserToken): string = pprint(x)
|
||||
|
||||
const LEFT_HIGHLIGHT_CHAR = ">>"
|
||||
const RIGHT_HIGHLIGHT_CHAR = "<<"
|
||||
func highlightStreamPosition(stream: string, position: int): string =
|
||||
if position < 0: return stream
|
||||
if position > stream.len - 1: return stream
|
||||
|
||||
let aIndex = position - 1
|
||||
let a = if aIndex < 0: ""
|
||||
else: stream[0..aIndex]
|
||||
|
||||
let ch = stream[position]
|
||||
|
||||
let bIndex = position + 1
|
||||
let b = if bIndex > stream.len - 1: ""
|
||||
else: stream[bIndex..stream.len - 1]
|
||||
|
||||
return a & LEFT_HIGHLIGHT_CHAR & ch & RIGHT_HIGHLIGHT_CHAR & b
|
||||
|
||||
func highlightStreamPosition2(stream: string, position: int): string =
|
||||
if position < 0: return stream
|
||||
if position > stream.len - 1: return stream
|
||||
|
||||
let ch = stream[position]
|
||||
|
||||
let (lineStartPosition, lineEndPosition) =
|
||||
case ch:
|
||||
of '\n':
|
||||
let lineStartPosition = (position - 1).max(0)
|
||||
let lineEndPosition = (position + 1).min(stream.len - 1)
|
||||
(lineStartPosition, lineEndPosition)
|
||||
else:
|
||||
(position, position)
|
||||
|
||||
let startIndex = stream.rfind("\n", 0, lineStartPosition)
|
||||
let lineStartIndex =
|
||||
case startIndex:
|
||||
of -1: 0
|
||||
else: startIndex + 1
|
||||
|
||||
let endIndex = stream.find("\n", lineEndPosition)
|
||||
let lineEndIndex =
|
||||
case endIndex:
|
||||
of -1: stream.len
|
||||
else: endIndex
|
||||
|
||||
let spaceChars = " ".repeat((position - lineStartIndex).max(0))
|
||||
let lineChars = "_".repeat((lineEndIndex - position).max(0) + 10)
|
||||
|
||||
let escapedChar =
|
||||
case ch:
|
||||
of '\n': "\\n"
|
||||
of ' ': "\\s"
|
||||
else: $ch
|
||||
|
||||
let insertMessageAtIndex =
|
||||
case ch:
|
||||
# Print indicator for newlines on the previous line, which looks better for the reader
|
||||
of '\n': (lineEndIndex - 1).max(1)
|
||||
else: lineEndIndex
|
||||
|
||||
let beforeNewline =
|
||||
case (startIndex, ch):
|
||||
# Always print newline for newline at the stream begin
|
||||
of (-1, '\n'): "\n"
|
||||
# Don't insert a newline when the character is a newline between newlines
|
||||
of (_, '\n'): ""
|
||||
else: "\n"
|
||||
|
||||
# debugEcho "char: " & $ch
|
||||
# debugEcho "position: " & $position
|
||||
# debugEcho "startIndex: " & $lineStartIndex
|
||||
# debugEcho "endIndex: " & $lineEndIndex
|
||||
# debugEcho "insertMessageAtIndex: " & $insertMessageAtIndex
|
||||
|
||||
stream.dup(insert(&"{beforeNewline}{spaceChars}^{lineChars} Char at \"{escapedChar}\"\n", insertMessageAtIndex))
|
||||
|
||||
func `$`*(x: ParserState): string =
|
||||
&"""ParserState(
|
||||
stream: "{x.stream.highlightStreamPosition(x.position)}",
|
||||
position: {x.position},
|
||||
lastPosition: {x.lastPosition},
|
||||
)"""
|
||||
|
||||
func `$`*(x: Parser): string =
|
||||
&"""Parser(
|
||||
state: {indentAfterNewline($x.state, 2)},
|
||||
tokens: {indentAfterNewline($x.tokens, 2)},
|
||||
)"""
|
||||
|
||||
func `$`*(x: ParserError): string =
|
||||
case x:
|
||||
of charMismatchErr(expected: @expected, parser: @parser, index: @index, unexpected: @unexpected):
|
||||
# TODO: Only works for single line right now
|
||||
let original = parser.state.stream
|
||||
.deleteAfterNewline(parser.state.position)
|
||||
let errSpace = " ".repeat(max(0, index))
|
||||
|
||||
&"""Parsing Error (Character Mismatch Error):
|
||||
{original}
|
||||
{errSpace}^ Expected '{expected}' but got '{unexpected}'"""
|
||||
of choiceMismatchErr(expected: @expected, parser: @parser, index: @index, unexpected: @unexpected):
|
||||
let original = parser.state.stream
|
||||
.deleteAfterNewline(parser.state.position)
|
||||
let errSpace = " ".repeat(max(0, index))
|
||||
|
||||
&"""Parsing Error (Character Mismatch Error):
|
||||
{original}
|
||||
{errSpace}^ Expected '{expected}' but got '{unexpected}'"""
|
||||
|
||||
of endOfStringErr(parser: @parser, index: @index):
|
||||
let original = parser.state.stream
|
||||
.deleteAfterNewline(parser.state.position)
|
||||
let errSpace = " ".repeat(max(0, index))
|
||||
|
||||
&"""Parsing Error (EndOfString Expected):
|
||||
{original}
|
||||
{errSpace}^ Expected 'EndOfString' at {index} but got {original.len - 1}"""
|
||||
|
||||
of startOfStringErr(parser: @parser, index: @index):
|
||||
let original = parser.state.stream
|
||||
.deleteAfterNewline(parser.state.position)
|
||||
let errSpace = " ".repeat(max(0, index))
|
||||
|
||||
&"""Parsing Error (StartOFString Expected):
|
||||
{original}
|
||||
{errSpace}^ Expected 'StartOfString' at index {index}"""
|
||||
|
||||
else: "ParseError"
|
||||
|
||||
when isMainModule:
|
||||
block testEqualizers:
|
||||
assert initParserToken("a") == initParserToken("a")
|
||||
assert initParserToken('a') == initParserToken('a')
|
||||
assert initParserToken("a") != initParserToken('a')
|
||||
|
||||
block testGetters:
|
||||
# toString
|
||||
assert initParserToken("a").toString() == "a"
|
||||
assert initParserToken('a').toString() == "a"
|
||||
|
||||
block testModifiers:
|
||||
let testTokensSeq = @[initParserToken("a"), initParserToken('b'), initParserToken("c")]
|
||||
let testExpectedStr = "abc"
|
||||
let testTokensSeqParser = initParser(
|
||||
stream = testExpectedStr,
|
||||
tokens = testTokensSeq,
|
||||
position = testTokensSeq.len - 1,
|
||||
)
|
||||
let testTokensSeqParserResult = ParserResult.ok(testTokensSeqParser)
|
||||
let testTokensSeqParserResultErr = ParserResult.err(ParserError(
|
||||
kind: charMismatchErr,
|
||||
unexpected: "a",
|
||||
expected: "b",
|
||||
index: 0,
|
||||
parser: testTokensSeqParser,
|
||||
))
|
||||
|
||||
# flattenTokens
|
||||
assert testTokensSeqParser.flattenTokens().tokens[0] == initParserToken("abc")
|
||||
|
||||
# emptyTokens
|
||||
assert testTokensSeqParser.emptyTokens().tokens.len == 0
|
||||
|
||||
# foldTokens
|
||||
assert testTokensSeqParserResult.foldTokens(
|
||||
err => "Failure",
|
||||
xs => xs.toString(),
|
||||
) == testExpectedStr
|
||||
assert testTokensSeqParserResultErr.foldTokens(
|
||||
err => "Failure",
|
||||
xs => xs.toString(),
|
||||
) == "Failure"
|
||||
|
||||
# tokensToString
|
||||
assert testTokensSeqParserResult.tokensToString() == testExpectedStr
|
||||
assert testTokensSeq.tokensToString() == testExpectedStr
|
||||
37
src_v2/utils/fp.nim
Normal file
37
src_v2/utils/fp.nim
Normal file
@@ -0,0 +1,37 @@
|
||||
import std/sugar
|
||||
import fp/maybe
|
||||
import results
|
||||
|
||||
func last*[T](xs: seq[T]): Maybe[T] =
|
||||
if xs.len == 0:
|
||||
nothing(T)
|
||||
else:
|
||||
just(xs[^1])
|
||||
|
||||
template isSome*(self: Result): bool = self.isOk()
|
||||
template isNone*(self: Result): bool = self.isErr()
|
||||
|
||||
proc findMaybe*[T](xs: seq[T], fn: T -> bool): Maybe[T] =
|
||||
for x in xs:
|
||||
if fn(x):
|
||||
return Just(x)
|
||||
return Nothing[T]()
|
||||
|
||||
proc findMaybeFn*[T, B](fns: seq[T {.nimcall.} -> Maybe[B]], val: T): Maybe[B] =
|
||||
for fn in fns:
|
||||
let res = fn(val)
|
||||
if res.isDefined():
|
||||
return res
|
||||
return Nothing[B]()
|
||||
|
||||
proc notNegative*[int](x: Maybe[int]): Maybe[int] =
|
||||
## Maps nil object to nothing
|
||||
x.filter(i => i >= 0)
|
||||
|
||||
when isMainModule:
|
||||
echo @[
|
||||
(x: int) => (if x == 2: Just("foo") else: Nothing[string]()),
|
||||
].findMaybeFn(2)
|
||||
|
||||
assert last(@[1,2,3]) == just(3)
|
||||
assert last[int](@[]) == nothing(int)
|
||||
15
src_v2/utils/parsec_test.nim
Normal file
15
src_v2/utils/parsec_test.nim
Normal file
@@ -0,0 +1,15 @@
|
||||
import sequtils
|
||||
import std/sugar
|
||||
import microparsec
|
||||
import results
|
||||
|
||||
echo toSeq(1..1000000)
|
||||
|
||||
# let headlineParser = manyTill(ch '*', space)
|
||||
# .flatMap((stars: seq[char]) => pure(stars))
|
||||
# # .flatMap((stars) => manyTill(anyChar, endOfLine)
|
||||
# # .flatMap(headline => pure(stars, headline))
|
||||
# # )
|
||||
|
||||
# echo headlineParser.parse("*** Headline")
|
||||
# echo headlineParser.parse("* Headline")
|
||||
28
src_v2/utils/parser.nim
Normal file
28
src_v2/utils/parser.nim
Normal file
@@ -0,0 +1,28 @@
|
||||
import std/parseutils
|
||||
|
||||
proc fastSubstr(s: string; token: var string; start, length: int) =
|
||||
token.setLen length
|
||||
for i in 0 ..< length: token[i] = s[i+start]
|
||||
|
||||
proc parseUntilBackwards*(s: string, token: var string, until: string,
|
||||
start = 0): int {.inline.} =
|
||||
## Parses a token and stores it in ``token``. Returns
|
||||
## the number of the parsed characters or 0 in case of an error. A token
|
||||
## consists of any character that comes before the `until` token.
|
||||
runnableExamples:
|
||||
var myToken: string
|
||||
doAssert parseUntil("Hello World", myToken, "Wor") == 6
|
||||
doAssert myToken == "Hello "
|
||||
doAssert parseUntil("Hello World", myToken, "Wor", 2) == 4
|
||||
doAssert myToken == "llo "
|
||||
var i = s.len
|
||||
while i > 0:
|
||||
if until.len > 0 and s[i] == until[until.len - 1]:
|
||||
var u = 1
|
||||
while i+u < s.len and u < until.len and s[i+u] == until[u]:
|
||||
inc u
|
||||
if u >= until.len: break
|
||||
dec(i)
|
||||
result = i-start
|
||||
fastSubstr(s, token, start, result)
|
||||
#token = substr(s, start, i-1)
|
||||
44
src_v2/utils/printers.nim
Normal file
44
src_v2/utils/printers.nim
Normal file
@@ -0,0 +1,44 @@
|
||||
import std/[
|
||||
options,
|
||||
sequtils,
|
||||
strformat,
|
||||
strutils,
|
||||
sugar,
|
||||
]
|
||||
|
||||
const INDENT_SIZE* = 2;
|
||||
|
||||
func stringifyFields*(
|
||||
xs: seq[tuple[
|
||||
field: string,
|
||||
value: string,
|
||||
print: bool
|
||||
]],
|
||||
indent = 0,
|
||||
): string =
|
||||
xs
|
||||
.filter(x => x.print)
|
||||
.map(x => x.field & ": " & $x.value)
|
||||
.join(",\n")
|
||||
|
||||
func stringifyBlock*(blockName: string, indent = 0, xs: varargs[string]): string =
|
||||
let fieldIndent = indent + INDENT_SIZE
|
||||
|
||||
concat(
|
||||
@[&"{blockName}("],
|
||||
@xs.mapIt(it.indent(fieldIndent)),
|
||||
@[")"],
|
||||
)
|
||||
.join("\n")
|
||||
|
||||
func stringifySeq*[T](xs: seq[T], stringifyFn: (T) -> string, indent = 0): string =
|
||||
let fieldIndent = indent + INDENT_SIZE
|
||||
@[
|
||||
"@[",
|
||||
xs
|
||||
.mapIt(it.stringifyFn())
|
||||
.join(",\n")
|
||||
.indent(fieldIndent),
|
||||
"]",
|
||||
]
|
||||
.join("\n")
|
||||
60
src_v2/utils/str.nim
Normal file
60
src_v2/utils/str.nim
Normal file
@@ -0,0 +1,60 @@
|
||||
import std/[
|
||||
strutils,
|
||||
math,
|
||||
]
|
||||
import fusion/matching
|
||||
|
||||
{.experimental: "caseStmtMacros".}
|
||||
|
||||
func prettyExpectedSet*(x: set[char]): string =
|
||||
## Pretty print value for a set `x` of characters
|
||||
case x:
|
||||
of AllChars:
|
||||
"AllChars {'\x00'..'\xFF'}"
|
||||
of Digits:
|
||||
"Digits {'0'..'9'}"
|
||||
of HexDigits:
|
||||
"HexDigits {'0'..'9', 'A'..'F', 'a'..'f'}"
|
||||
of Letters:
|
||||
"Letters {'A'..'Z', 'a'..'z'}"
|
||||
of Newlines:
|
||||
"Newlines {'\r', '\n'}"
|
||||
of Whitespace:
|
||||
"Whitespace {' ', '\t', '\v', '\r', '\n', '\f'}"
|
||||
else:
|
||||
$x
|
||||
|
||||
func indentAfterNewline*(str: string, count: int): string =
|
||||
## Indent lines following the first line of `str` by `count`.
|
||||
## Useful for indenting nested keys of stringify functions.
|
||||
var strDup = str.indent(count)
|
||||
strDup.delete(0..count - 1)
|
||||
strDup
|
||||
|
||||
proc safeDelete*(str: string, slice: Slice[int]): string =
|
||||
## Deletes the items `str[slice]`, ignoring elements out of range.
|
||||
if slice.a > str.len - 1:
|
||||
str
|
||||
else:
|
||||
let fromIndex = clamp(slice.a, 0..str.len)
|
||||
let toIndex = clamp(slice.b, 0..str.len - 1)
|
||||
|
||||
var strDup = str
|
||||
strDup.delete(fromIndex..toIndex)
|
||||
strDup
|
||||
|
||||
func findAndDelete*(str: string, chars: set[char], start = 0, last = str.len - 1): string =
|
||||
## Find the next instance of `chars` from `start`.
|
||||
## When found delete characters until `last`.
|
||||
# Prevent passing negative numbers (e.g.: initial parser)
|
||||
if start >= 0 and last >= 0:
|
||||
let startChar = str.find(chars, start, last)
|
||||
if startChar == -1:
|
||||
str
|
||||
else:
|
||||
str.safeDelete(startChar..last)
|
||||
else: str
|
||||
|
||||
func deleteAfterNewline*(str: string, start = 0): string =
|
||||
## Delete string after next Newline from `start`.
|
||||
findAndDelete(str, Newlines, start)
|
||||
Reference in New Issue
Block a user